Research Notebook for 300118 -- Demonstration of $q$-step Specific Entropy Rate and Jupyter Notebook

Objectives

  • Demonstrate the estimation of the normalized q-step specific entropy rate using the nlpl module.

Code

In [47]:
%matplotlib nbagg

Load in the required Python packages.

In [48]:
import numpy
numpy.random.seed(1)

import matplotlib.pyplot as plt
import matplotlib
import matplotlib.cm as cmx

import sdeint

import sys

sys.path.append('..')

from nlpl import *

from load_models import *

from mpl_toolkits.mplot3d import Axes3D

from matplotlib import rc
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size = 18)

Set parameters for data length, inference, etc.

In [49]:
p_max = 10
#p_max = 30

N = 10000

# model_name = 'nanopore'
# model_name = 'snanopore'

# model_name = 'lorenz'
model_name = 'slorenz'

# model_name = 'nanopore_iei'
# model_name = 'snanopore_iei'

# model_name = 'eeg'; roi_src = 10
# model_name = 'meg'; roi_src = 30

# model_name = 'gcp'; gcp_sensor_ind = 0 # Max is 26

nn_package = 'sklearn' # Deterministic, but slower.
# nn_package = 'pyflann' # Fast approximation, but non-deterministic.

# Determines the upper bound on the number of nearest neighbors to use
# for the computation of NLPL:

# pow_upperbound = 0.9
pow_upperbound = 0.5
# pow_upperbound = 0.75

# Determines the number of nearest neighbors to use in estimating 
# the specific entropy rate and normalized q-step specific entropy
# rate.

pow_neighbors = 0.5

Load in data

In [50]:
if model_name == 'eeg' or model_name == 'meg':
	x, p_true, model_type = load_model_data(model_name, N, roi_src = roi_src)
	is_multirealization = True
elif model_name == 'gcp':
	x, p_true, model_type = load_model_data(model_name, N, gcp_sensor_ind = gcp_sensor_ind)
	is_multirealization = False
else:
	x, p_true, model_type = load_model_data(model_name, N)
	is_multirealization = False

if model_name == 'nanopore':
	x = x + (numpy.random.rand(x.shape[0]) - 0.5)*(1e1)

if 'iei' in model_name:
	x = numpy.log(x)

Estimate the model order for q = 0 horizon

In [51]:
p_opt, nlpl_opt, nlpl_by_p, er_knn, ler_knn = choose_model_order_nlpl(x, p_max, pow_upperbound = pow_upperbound, nn_package = nn_package, is_multirealization = is_multirealization, announce_stages = False, output_verbose = True)

print 'Chose p* = {}, giving ER(p*) = {}'.format(p_opt, er_knn)
For p = 0, with NLPL(k = 5) = 3.42956755523
####################################################
# Warning: For p = 1, Nelder-Mead is choosing k* near k_upper = 100.
# Increase pow_upperbound.
####################################################
For p = 1, with NLPL(h* = 0.152463030016, k* = 100) = 2.58380811332
For p = 2, with NLPL(h* = 0.0740218800312, k* = 23) = 1.51126390824
For p = 3, with NLPL(h* = 0.0730067100157, k* = 15) = 1.43525974921
For p = 4, with NLPL(h* = 0.0732502388292, k* = 12) = 1.41918847958
For p = 5, with NLPL(h* = 0.075612829648, k* = 10) = 1.42572543276
For p = 6, with NLPL(h* = 0.0808677838642, k* = 8) = 1.44798317533
For p = 7, with NLPL(h* = 0.0863596916242, k* = 8) = 1.49636896105
For p = 8, with NLPL(h* = 0.0929564366175, k* = 7) = 1.53774151127
For p = 9, with NLPL(h* = 0.0969250932381, k* = 7) = 1.57706441841
For p = 10, with NLPL(h* = 0.101424066305, k* = 7) = 1.62218838757
Chose p* = 4, giving ER(p*) = 1.40142749813
In [52]:
plt.figure(figsize = (10, 5))
plt.plot(range(0, p_max + 1), nlpl_by_p)
plt.xlabel('Model Order $p$')
plt.ylabel('NLPL$(p)$')
plt.axvline(p_opt, color = 'r')
plt.tight_layout()

Estimate the specific entropy rate

In [53]:
print 'Estimating specific entropy rate in-sample with q = 0...'

ser = estimate_ser_insample(x, ler_knn, p_opt = p_opt, pow_neighbors = pow_neighbors)
Estimating specific entropy rate in-sample with q = 0...
In [54]:
fig, ax = plt.subplots(2, 1, sharex = True, figsize = (10, 5))
ax[0].plot(x)
ax[0].set_ylabel('$X_{t}$')
ax[1].plot([numpy.nan]*p_opt + ser.tolist())
ax[1].set_ylabel('$\\widehat{{H}}^{{S}}(X_{{t-{}}}^{{t-1}})$'.format(p_opt))
ax[1].set_xlabel('Time Step $t$ (au)')
plt.tight_layout()

Estimate the normalized q-step specific entropy rate

In [55]:
q = 5

print 'Estimating the predictive divergence in-sample with q = {}...'.format(q)

kl_q = estimate_normalized_qstep_insample(x, p_opt, q, pow_neighbors = pow_neighbors)
Estimating the predictive divergence in-sample with q = 5...
In [56]:
# Compare time series, SER, and normalized q-step SER for the full time series.

fig, ax = plt.subplots(3, 1, sharex = True, figsize = (10, 5))
ax[0].plot(x)
ax[0].set_ylabel('$X_{t}$')
ax[1].plot([numpy.nan]*p_opt + ser.tolist())
ax[1].set_ylabel('$\\widehat{{H}}^{{S}}(x_{{t-{}}}^{{t-1}})$'.format(p_opt))
ax[2].plot([numpy.nan]*p_opt + kl_q.tolist())
#ax[2].set_ylim([0, numpy.max(kl_q)])
ax[2].set_ylabel('$\\widehat{{\\widetilde{{H}}}}^{{S}}(X_{{t-{}}}^{{t-1}}; {})$'.format(p_opt, q))
ax[2].set_xlabel('Time Step $t$ (au)')
plt.tight_layout()

# Compare time series, SER, and normalized q-step SER for a snippet of the time series.

fig, ax = plt.subplots(3, 1, sharex = True, figsize = (10, 5))
ax[0].plot(x)
ax[0].set_ylabel('$X_{t}$')
ax[1].plot([numpy.nan]*p_opt + ser.tolist())
ax[1].set_ylabel('$\\widehat{{H}}^{{S}}(x_{{t-{}}}^{{t-1}})$'.format(p_opt))
ax[2].plot([numpy.nan]*p_opt + kl_q.tolist())
#ax[2].set_ylim([0, numpy.max(kl_q)])
ax[2].set_ylabel('$\\widehat{{\\widetilde{{H}}}}^{{S}}(X_{{t-{}}}^{{t-1}}; {})$'.format(p_opt, q))
ax[2].set_xlabel('Time Step $t$ (au)')
ax[2].set_xlim([0, 200])
plt.tight_layout()
In [57]:
ser_w_nan = [numpy.nan]*p_opt + ser.tolist()
kl_q_w_nan = [numpy.nan]*p_opt + kl_q.tolist() + [numpy.nan]*q

fig_range = [0, 200]

def uniformize(x):
    a = numpy.nanmin(x); b = numpy.nanmax(x)

    return (x - a)/float(b - a)

for measure_name, measure in zip(['Specific Entropy Rate', 'Normalized $q$-step Specific Entropy Rate'], [ser_w_nan, kl_q_w_nan]):
    _uds = uniformize(x)
    _udd = uniformize(measure)

    _s = range(len(x))
    _t = range(len(measure))
    tspan = range(len(measure))

    a = []
    b = []

    for s, t, u, v in zip(_s, _uds, _t, _udd):
        a.append(s)
        a.append(u)
        a.append(None)
        b.append(t)
        b.append(v)
        b.append(None)

    plt.figure(figsize = (10, 5))
    plt.plot(tspan, _uds, color = 'blue', linewidth = 2)
    plt.plot(tspan, _udd, color = 'red', linewidth = 2)
    plt.plot(a, b, color = 'green', linewidth = 0.25)
    plt.xlim(fig_range)
    plt.title(measure_name, fontsize = 12)

Plot a projection of the reconstructed state space shaded by the estimated specific entropy rate or estimated normalized q-step specific entropy rate.

In [58]:
for measure_type in ['ser', 'kl_q']:
    if measure_type == 'ser':
        measure = ser_w_nan
        cs = uniformize(measure)
    elif measure_type == 'kl_q':
        measure = kl_q_w_nan
        cs = uniformize(measure)

    fig = plt.figure()
    colorsMap = 'jet'

    cm = plt.get_cmap(colorsMap)
    cNorm = matplotlib.colors.Normalize(vmin=numpy.nanmin(cs), vmax=numpy.nanmax(cs))
    scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=cm)
    ax = Axes3D(fig)
    if measure_type == 'ser':
        ax.scatter(x[:-3], x[1:-2], x[2:-1], s = 10, c=scalarMap.to_rgba(cs[3:]), lw = 0)
    elif measure_type == 'kl_q':
        ax.scatter(x[:-(3 + q)], x[1:-(2 + q)], x[2:-(1 + q)], s = 10, c=scalarMap.to_rgba(cs[3:-q]), lw = 0)
    scalarMap.set_array(cs)
    fig.colorbar(scalarMap)
#     ax.set_xlim([-2, 2])
#     ax.set_ylim([-2, 2])
#     ax.set_zlim([-2, 2])

Plot a trajectory in the reconstructed state space.

In [61]:
from matplotlib import animation, rc
from IPython.display import HTML

#X_anim = embed_ts(x, p_max = 2)[::2, :]
X_anim = embed_ts(x, p_max = 2)

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot([X_anim[0, 0]], [X_anim[0, 1]], [X_anim[0, 2]])

# initialization function: plot the background of each frame
def init():
    ax.clear()
    ax.set_xlim([numpy.min(x), numpy.max(x)]); ax.set_ylim(numpy.min(x), numpy.max(x)); ax.set_zlim(numpy.min(x), numpy.max(x))
    ax.plot([X_anim[0, 0]], [X_anim[0, 1]], [X_anim[0, 2]], 'b')
    return (ax,)

def animate(i):
    n = i + 3
    ax.plot(X_anim[n:n+2, 0], X_anim[n:n+2, 1], X_anim[n:n+2, 2], 'b')
    ax.scatter(X_anim[n, 0], X_anim[n, 1], X_anim[n, 2], color = 'b')
    ax.scatter(X_anim[n+1, 0], X_anim[n+1, 1], X_anim[n+1, 2], color = 'r')
    return (ax,)

anim = animation.FuncAnimation(fig, animate, init_func=init,
                               frames=200, interval=200, blit=False)

#HTML(anim.to_html5_video())
HTML(anim.to_jshtml())
Out[61]:


Once Loop Reflect

Reflection

  • Lorem ipsum.

Next Actions

  • Lorem ipsum.